if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
printk(KERN_ERR PREFIX "Can't find FADT\n");
+#ifdef XEN
+ acpi_dmar_init();
+#endif
+
#ifdef CONFIG_SMP
if (available_cpus == 0) {
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
redir_num, vector);
return;
}
+ if ( iommu_enabled )
+ {
+ spin_unlock(&viosapic->lock);
+ hvm_dpci_eoi(current->domain, redir_num, &viosapic->redirtbl[redir_num]);
+ spin_lock(&viosapic->lock);
+ }
+
service_iosapic(viosapic);
spin_unlock(&viosapic->lock);
}
#include <asm/shadow.h>
#include <asm/sioemu.h>
#include <public/arch-ia64/sioemu.h>
+#include <xen/hvm/irq.h>
/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
viosapic_set_irq(d, callback_irq, 0);
}
}
+ hvm_dirq_assist(v);
}
rmb();
if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
goto fail_nomem;
+ if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ){
+ if(iommu_domain_init(d) != 0)
+ goto fail_iommu;
+ }
+
/*
* grant_table_create() can't fully initialize grant table for domain
* because it is called before arch_domain_create().
dprintk(XENLOG_DEBUG, "arch_domain_create: domain=%p\n", d);
return 0;
+fail_iommu:
+ iommu_domain_destroy(d);
fail_nomem:
tlb_track_destroy(d);
fail_nomem1:
free_xenheap_pages(d->shared_info,
get_order_from_shift(XSI_SHIFT));
+ if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ) {
+ pci_release_devices(d);
+ iommu_domain_destroy(d);
+ }
+
tlb_track_destroy(d);
/* Clear vTLB for the next domain. */
struct domain *guest[IRQ_MAX_GUESTS];
} irq_guest_action_t;
+static struct timer irq_guest_eoi_timer[NR_IRQS];
+static void irq_guest_eoi_timer_fn(void *data)
+{
+ irq_desc_t *desc = data;
+ unsigned vector = desc - irq_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ desc->status &= ~IRQ_INPROGRESS;
+ desc->handler->enable(vector);
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
void __do_IRQ_guest(int irq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
struct domain *d;
- int i;
+ int i, already_pending = 0;
for ( i = 0; i < action->nr_guests; i++ )
{
if ( (action->ack_type != ACKTYPE_NONE) &&
!test_and_set_bit(irq, &d->pirq_mask) )
action->in_flight++;
- send_guest_pirq(d, irq);
- }
+ if ( hvm_do_IRQ_dpci(d, irq) )
+ {
+ if ( action->ack_type == ACKTYPE_NONE )
+ {
+ already_pending += !!(desc->status & IRQ_INPROGRESS);
+ desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */
+ }
+ }
+ else if ( send_guest_pirq(d, irq) &&
+ (action->ack_type == ACKTYPE_NONE) )
+ {
+ already_pending++;
+ }
+ }
+
+ if ( already_pending == action->nr_guests )
+ {
+ desc->handler->disable(irq);
+ stop_timer(&irq_guest_eoi_timer[irq]);
+ init_timer(&irq_guest_eoi_timer[irq],
+ irq_guest_eoi_timer_fn, desc, smp_processor_id());
+ set_timer(&irq_guest_eoi_timer[irq], NOW() + MILLISECS(1));
+ }
}
int pirq_acktype(int irq)
if (mfn == INVALID_MFN) {
// clear pte
old_pte = ptep_get_and_clear(mm, mpaddr, pte);
+ if(!pte_mem(old_pte))
+ return;
mfn = pte_pfn(old_pte);
} else {
unsigned long old_arflags;
if(!mfn_valid(mfn))
return;
+ if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ){
+ int i, j;
+ j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
+ for(i = 0 ; i < j; i++)
+ iommu_unmap_page(d, (mpaddr>>PAGE_SHIFT)*j + i);
+ }
+
page = mfn_to_page(mfn);
BUG_ON((page->count_info & PGC_count_mask) == 0);
smp_mb();
assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn,
ASSIGN_writable | ASSIGN_pgc_allocated);
+ if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ){
+ int i, j;
+ j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
+ for(i = 0 ; i < j; i++)
+ iommu_map_page(d, gpfn*j + i, mfn*j + i);
+ }
}
int
#include <asm/numa.h>
#ifdef XEN
#include <xen/nodemask.h>
+extern int acpi_dmar_init(void);
#endif
#define COMPILER_DEPENDENT_INT64 long
static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
{
+#ifdef XEN
+ if(iommu_enabled && (reg >= 10)){
+ int apic = find_iosapic_by_addr((unsigned long)iosapic);
+ return io_apic_read_remap_rte(apic, reg);
+ }
+#endif
writel(reg, iosapic + IOSAPIC_REG_SELECT);
return readl(iosapic + IOSAPIC_WINDOW);
}
static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{
+#ifdef XEN
+ if (iommu_enabled && (reg >= 10)){
+ int apic = find_iosapic_by_addr((unsigned long)iosapic);
+ iommu_update_ire_from_apic(apic, reg, val);
+ return;
+ }
+#endif
writel(reg, iosapic + IOSAPIC_REG_SELECT);
writel(val, iosapic + IOSAPIC_WINDOW);
}
unsigned long viosapic_read(struct vcpu *v, unsigned long addr,
unsigned long length);
+void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
+ union vioapic_redir_entry *ent);
#endif /* __ASM_IA64_VMX_VIOSAPIC_H__ */